This workbook was created using the ‘dataexpks’ template:
https://github.com/DublinLearningGroup/dataexpks
This workbook performs the basic data exploration of the dataset.
dataexp_level_exclusion_threshold <- 100
dataexp_cat_level_count <- 40
dataexp_hist_bins_count <- 50
First we load the dataset.
### _TEMPLATE_
### Data is loaded into dataset rawdata_tbl here
### We may wish to set column typs
data_col_types <- cols(
# VAR1 = col_character()
# ,VAR2 = col_date()
# ,VAR3 = col_number()
)
### Data is loaded into dataset rawdata_tbl here
rawdata_tbl <- read_delim('data/bank-full.csv'
,delim = ';'
,locale = locale()
,col_types = data_col_types
,progress = FALSE
)
glimpse(rawdata_tbl)
## Observations: 45,211
## Variables: 17
## $ age <int> 58, 44, 33, 47, 33, 35, 28, 42, 58, 43, 41, 29, 53, 58, 57, 51, 45,...
## $ job <chr> "management", "technician", "entrepreneur", "blue-collar", "unknown...
## $ marital <chr> "married", "single", "married", "married", "single", "married", "si...
## $ education <chr> "tertiary", "secondary", "secondary", "unknown", "unknown", "tertia...
## $ default <chr> "no", "no", "no", "no", "no", "no", "no", "yes", "no", "no", "no", ...
## $ balance <int> 2143, 29, 2, 1506, 1, 231, 447, 2, 121, 593, 270, 390, 6, 71, 162, ...
## $ housing <chr> "yes", "yes", "yes", "yes", "no", "yes", "yes", "yes", "yes", "yes"...
## $ loan <chr> "no", "no", "yes", "no", "no", "no", "yes", "no", "no", "no", "no",...
## $ contact <chr> "unknown", "unknown", "unknown", "unknown", "unknown", "unknown", "...
## $ day <int> 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5...
## $ month <chr> "may", "may", "may", "may", "may", "may", "may", "may", "may", "may...
## $ duration <int> 261, 151, 76, 92, 198, 139, 217, 380, 50, 55, 222, 137, 517, 71, 17...
## $ campaign <int> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
## $ pdays <int> -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,...
## $ previous <int> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
## $ poutcome <chr> "unknown", "unknown", "unknown", "unknown", "unknown", "unknown", "...
## $ y <chr> "no", "no", "no", "no", "no", "no", "no", "no", "no", "no", "no", "...
### _TEMPLATE_
### Do simple datatype transforms and save output in data_tbl
data_tbl <- rawdata_tbl
names(data_tbl) <- rawdata_tbl %>% names %>% clean_names
glimpse(data_tbl)
## Observations: 45,211
## Variables: 17
## $ age <int> 58, 44, 33, 47, 33, 35, 28, 42, 58, 43, 41, 29, 53, 58, 57, 51, 45,...
## $ job <chr> "management", "technician", "entrepreneur", "blue-collar", "unknown...
## $ marital <chr> "married", "single", "married", "married", "single", "married", "si...
## $ education <chr> "tertiary", "secondary", "secondary", "unknown", "unknown", "tertia...
## $ default <chr> "no", "no", "no", "no", "no", "no", "no", "yes", "no", "no", "no", ...
## $ balance <int> 2143, 29, 2, 1506, 1, 231, 447, 2, 121, 593, 270, 390, 6, 71, 162, ...
## $ housing <chr> "yes", "yes", "yes", "yes", "no", "yes", "yes", "yes", "yes", "yes"...
## $ loan <chr> "no", "no", "yes", "no", "no", "no", "yes", "no", "no", "no", "no",...
## $ contact <chr> "unknown", "unknown", "unknown", "unknown", "unknown", "unknown", "...
## $ day <int> 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5...
## $ month <chr> "may", "may", "may", "may", "may", "may", "may", "may", "may", "may...
## $ duration <int> 261, 151, 76, 92, 198, 139, 217, 380, 50, 55, 222, 137, 517, 71, 17...
## $ campaign <int> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
## $ pdays <int> -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,...
## $ previous <int> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
## $ poutcome <chr> "unknown", "unknown", "unknown", "unknown", "unknown", "unknown", "...
## $ y <chr> "no", "no", "no", "no", "no", "no", "no", "no", "no", "no", "no", "...
We now create derived features useful for modelling. These values are new variables calculated from existing variables in the data.
data_tbl <- data_tbl %>%
mutate(date = sprintf('2000-%s-%02d', month, day) %>% as.Date('%Y-%b-%d'))
data_tbl %>% glimpse()
## Observations: 45,211
## Variables: 18
## $ age <int> 58, 44, 33, 47, 33, 35, 28, 42, 58, 43, 41, 29, 53, 58, 57, 51, 45,...
## $ job <chr> "management", "technician", "entrepreneur", "blue-collar", "unknown...
## $ marital <chr> "married", "single", "married", "married", "single", "married", "si...
## $ education <chr> "tertiary", "secondary", "secondary", "unknown", "unknown", "tertia...
## $ default <chr> "no", "no", "no", "no", "no", "no", "no", "yes", "no", "no", "no", ...
## $ balance <int> 2143, 29, 2, 1506, 1, 231, 447, 2, 121, 593, 270, 390, 6, 71, 162, ...
## $ housing <chr> "yes", "yes", "yes", "yes", "no", "yes", "yes", "yes", "yes", "yes"...
## $ loan <chr> "no", "no", "yes", "no", "no", "no", "yes", "no", "no", "no", "no",...
## $ contact <chr> "unknown", "unknown", "unknown", "unknown", "unknown", "unknown", "...
## $ day <int> 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5...
## $ month <chr> "may", "may", "may", "may", "may", "may", "may", "may", "may", "may...
## $ duration <int> 261, 151, 76, 92, 198, 139, 217, 380, 50, 55, 222, 137, 517, 71, 17...
## $ campaign <int> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
## $ pdays <int> -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,...
## $ previous <int> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
## $ poutcome <chr> "unknown", "unknown", "unknown", "unknown", "unknown", "unknown", "...
## $ y <chr> "no", "no", "no", "no", "no", "no", "no", "no", "no", "no", "no", "...
## $ date <date> 2000-05-05, 2000-05-05, 2000-05-05, 2000-05-05, 2000-05-05, 2000-0...
Before we do anything with the data, we first check for missing values in the dataset. In some cases, missing data is coded by a special character rather than as a blank, so we first correct for this.
### _TEMPLATE_
### ADD CODE TO CORRECT FOR DATA ENCODING HERE
With missing data properly encoded, we now visualise the missing data in a number of different ways.
We first examine a simple univariate count of all the missing data:
row_count <- data_tbl %>% nrow
missing_univariate_tbl <- data_tbl %>%
summarise_all(funs(sum(is.na(.)))) %>%
gather('variable','missing_count') %>%
mutate(missing_prop = missing_count / row_count)
ggplot(missing_univariate_tbl) +
geom_bar(aes(x = fct_reorder(variable, -missing_prop), weight = missing_prop)) +
scale_y_continuous(labels = comma) +
xlab("Variable") +
ylab("Missing Value Proportion") +
theme(axis.text.x = element_text(angle = 90))
We remove all variables where all of the entries are missing
remove_vars <- missing_univariate_tbl %>%
filter(missing_count == row_count) %>%
.[['variable']]
lessmiss_data_tbl <- data_tbl %>%
dplyr::select(-one_of(remove_vars))
With these columns removed, we repeat the exercise.
missing_univariate_tbl <- lessmiss_data_tbl %>%
summarise_all(funs(sum(is.na(.)))) %>%
gather('variable','missing_count') %>%
mutate(missing_prop = missing_count / row_count)
ggplot(missing_univariate_tbl) +
geom_bar(aes(x = fct_reorder(variable, -missing_prop), weight = missing_prop)) +
scale_y_continuous(labels = comma) +
xlab("Variable") +
ylab("Missing Value Proportion") +
theme(axis.text.x = element_text(angle = 90))
To reduce the scale of this plot, we look at the top twenty missing data counts.
missing_univariate_top_tbl <- missing_univariate_tbl %>%
arrange(desc(missing_count)) %>%
top_n(n = 50, wt = missing_count)
ggplot(missing_univariate_top_tbl) +
geom_bar(aes(x = fct_reorder(variable, -missing_prop), weight = missing_prop)) +
scale_y_continuous(labels = comma) +
xlab("Variable") +
ylab("Missing Value Proportion") +
theme(axis.text.x = element_text(angle = 90))
It is useful to get an idea of what combinations of variables tend to have variables with missing values simultaneously, so to construct a visualisation for this we create a count of all the times given combinations of variables have missing values, producing a heat map for these combination counts.
missing_plot_tbl <- rawdata_tbl %>%
mutate_all(funs(is.na)) %>%
mutate_all(funs(as.numeric)) %>%
mutate(label = do.call(paste0, (.))) %>%
group_by(label) %>%
summarise_all(funs(sum)) %>%
arrange(desc(label)) %>%
dplyr::select(-label) %>%
mutate(rowid = do.call(pmax, (.))) %>%
gather('col','count', -rowid) %>%
mutate(Proportion = count / row_count
,rowid = round(rowid / row_count, 4)
)
ggplot(missing_plot_tbl) +
geom_tile(aes(x = col, y = as.factor(rowid), fill = Proportion), height = 0.8) +
scale_fill_continuous(labels = comma) +
scale_x_discrete(position = 'top') +
xlab("Variable") +
ylab("Missing Value Proportion") +
theme(axis.text.x = element_text(angle = 90))
This visualisation takes a little explaining.
Each row represents a combination of variables with simultaneous missing values. For each row in the graphic, the coloured entries show which particular variables are missing in that combination. The proportion of rows with that combination is displayed in both the label for the row and the colouring for the cells in the row.
With the raw data loaded up we now remove obvious unique or near-unique variables that are not amenable to basic exploration and plotting.
coltype_lst <- create_coltype_list(data_tbl)
if(!is.null(coltype_lst$split$discrete)) {
catvar_valuecount_tbl <- data_tbl %>%
summarise_at(coltype_lst$split$discrete
,function(x) length(unique(x))) %>%
gather('var_name', 'level_count') %>%
arrange(-level_count)
print(catvar_valuecount_tbl)
row_count <- nrow(data_tbl)
cat(paste0("Dataset has ", row_count, " rows\n"))
} else {
cat('No categorical variables in dataset\n')
}
## # A tibble: 10 x 2
## var_name level_count
## <chr> <int>
## 1 job 12
## 2 month 12
## 3 education 4
## 4 poutcome 4
## 5 marital 3
## 6 contact 3
## 7 default 2
## 8 housing 2
## 9 loan 2
## 10 y 2
## Dataset has 45211 rows
Now that we a table of the counts of all the categorical variables we can automatically exclude unique variables from the exploration, as the level count will match the row count.
if(!is.null(coltype_lst$split$discrete)) {
unique_vars <- catvar_valuecount_tbl %>%
filter(level_count == row_count) %>%
.[["var_name"]]
print(unique_vars)
explore_data_tbl <- data_tbl %>%
dplyr::select(-one_of(unique_vars))
} else {
explore_data_tbl <- data_tbl
}
## character(0)
Having removed the unique identifier variables from the dataset, we may also wish to exclude categoricals with high level counts also, so we create a vector of those variable names.
if(!is.null(coltype_lst$split$discrete)) {
highcount_vars <- catvar_valuecount_tbl %>%
filter(level_count >= dataexp_level_exclusion_threshold
,level_count < row_count) %>%
.[["var_name"]]
cat(paste0(highcount_vars, collapse = ', '))
} else {
highcount_vars <- c()
}
We now can continue doing some basic exploration of the data. We may also choose to remove some extra columns from the dataset.
### You may want to comment out these next few lines to customise which
### categoricals are kept in the exploration.
drop_vars <- c(highcount_vars)
if(length(drop_vars) > 0) {
explore_data_tbl <- explore_data_tbl %>%
dplyr::select(-one_of(drop_vars))
cat(paste0(drop_vars, collapse = ', '))
}
data_tbl %>% write_csv(path = 'data/loaded_data.csv')
Now that we have loaded the data we can prepare it for some basic data exploration. We first exclude the variables that are unique identifiers or similar, and tehen split the remaining variables out into various categories to help with the systematic data exploration.
coltype_lst <- create_coltype_list(explore_data_tbl)
print(coltype_lst)
## $split
## $split$continuous
## [1] "age" "balance" "day" "duration" "campaign" "pdays" "previous"
##
## $split$datetime
## [1] "date"
##
## $split$discrete
## [1] "job" "marital" "education" "default" "housing" "loan" "contact"
## [8] "month" "poutcome" "y"
##
##
## $columns
## age job marital education default balance
## "continuous" "discrete" "discrete" "discrete" "discrete" "continuous"
## housing loan contact day month duration
## "discrete" "discrete" "discrete" "continuous" "discrete" "continuous"
## campaign pdays previous poutcome y date
## "continuous" "continuous" "continuous" "discrete" "discrete" "datetime"
Logical variables only take two values: TRUE or FALSE. It is useful to see missing data as well though, so we also plot the count of those.
logical_vars <- coltype_lst$split$logical
for(plot_varname in logical_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
na_count <- explore_data_tbl %>% .[[plot_varname]] %>% is.na %>% sum
explore_plot <- ggplot(explore_data_tbl) +
geom_bar(aes_string(x = plot_varname)) +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0('Barplot of Counts for Variable: ', plot_varname
,' (', na_count, ' missing values)')) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
plot(explore_plot)
}
Numeric variables are usually continuous in nature, though we also have integer data.
numeric_vars <- coltype_lst$split$continuous
for(plot_varname in numeric_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
plot_var <- explore_data_tbl %>% .[[plot_varname]]
na_count <- plot_var %>% is.na %>% sum
plot_var %>% summary %>% print
explore_plot <- ggplot(explore_data_tbl) +
geom_histogram(aes_string(x = plot_varname), bins = dataexp_hist_bins_count) +
geom_vline(xintercept = mean (plot_var, na.rm = TRUE), colour = 'red', size = 1.5) +
geom_vline(xintercept = median(plot_var, na.rm = TRUE), colour = 'green', size = 1.5) +
xlab(plot_varname) +
ylab("Count") +
scale_x_continuous(labels = comma) +
scale_y_continuous(labels = comma) +
ggtitle(paste0('Histogram Plot for Variable: ', plot_varname
,' (', na_count, ' missing values)')
,subtitle = '(red line is mean, green line is median)')
print(explore_plot)
}
## --
## age
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 18.0 33.0 39.0 40.9 48.0 95.0
## --
## balance
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -8019 72 448 1362 1428 102127
## --
## day
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 1.0 8.0 16.0 15.8 21.0 31.0
## --
## duration
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0 103 180 258 319 4918
## --
## campaign
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 1.00 1.00 2.00 2.76 3.00 63.00
## --
## pdays
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -1.0 -1.0 -1.0 40.2 -1.0 871.0
## --
## previous
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.00 0.00 0.00 0.58 0.00 275.00
Categorical variables only have values from a limited, and usually fixed, number of possible values
categorical_vars <- coltype_lst$split$discrete
for(plot_varname in categorical_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
na_count <- explore_data_tbl %>% .[[plot_varname]] %>% is.na %>% sum
plot_tbl <- explore_data_tbl %>%
.[[plot_varname]] %>%
as.character %>%
fct_lump(n = dataexp_cat_level_count) %>%
fct_count
explore_plot <- ggplot(plot_tbl) +
geom_bar(aes(x = fct_reorder(f, -n), weight = n)) +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0('Barplot of Counts for Variable: ', plot_varname
,' (', na_count, ' missing values)')) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
plot(explore_plot)
}
## --
## job
## --
## marital
## --
## education
## --
## default
## --
## housing
## --
## loan
## --
## contact
## --
## month
## --
## poutcome
## --
## y
Date/Time variables represent calendar or time-based data should as time of the day, a date, or a timestamp.
datetime_vars <- coltype_lst$split$datetime
for(plot_varname in datetime_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
plot_var <- explore_data_tbl %>% .[[plot_varname]]
na_count <- plot_var %>% is.na %>% sum
plot_var %>% summary %>% print
explore_plot <- ggplot(explore_data_tbl) +
geom_histogram(aes_string(x = plot_varname), bins = dataexp_hist_bins_count) +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0('Barplot of Dates/Times in Variable: ', plot_varname
,' (', na_count, ' missing values)'))
plot(explore_plot)
}
## --
## date
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## "2000-01-06" "2000-05-09" "2000-06-04" "2000-06-20" "2000-08-05" "2000-12-31"
We now move on to looking at bivariate plots of the data set.
Pairs plots area very useful way of getting a quick idea of the relationships between variables in a data set.
Unfortunately, they do not scale well. Too many rows (say more than 5,000) can slow down the rendering, and more than 10 columns can make the plots uninterpretable as each cell is too small.
The technique is useful, so to circumvent these issues we sample the dataset. We select random columns and rows, and make a pairs plot of the subset, repeating this process for a number of iterations.
dataexp_pairsplot_itercount <- 3
dataexp_pairsplot_colcount <- 5
dataexp_pairsplot_rowcount <- 5000
if(ncol(data_tbl) > dataexp_pairsplot_colcount ||
nrow(data_tbl) > dataexp_pairsplot_rowcount) {
### Ugly hack to work around current dplyr bug for mutate_if
if(any(sapply(explore_data_tbl, is.logical))) {
conv_tbl <- explore_data_tbl %>%
mutate_if(is.logical, as.factor)
} else {
conv_tbl <- explore_data_tbl
}
conv_tbl <- conv_tbl %>%
mutate_if(function(x) (is.character(x) || is.factor(x)) && !all(is.na(x))
,function(x) fct_lump(x, n = 9))
for(i in 1:dataexp_pairsplot_itercount) {
cat("--\n")
cat(paste0("Pairs plot iter: ", i, "\n"))
pairs_tbl <- conv_tbl %>%
create_ggpairs_tbl(sample_cols = dataexp_pairsplot_colcount
,sample_rows = dataexp_pairsplot_rowcount
)
cat(paste0("Columns: ", paste0(names(pairs_tbl), collapse = ', '), "\n"))
pairs_tbl %>%
ggpairs(cardinality_threshold = NULL
,lower = list(combo = wrap('facethist', bins = 25))
) %>%
print
}
} else {
ggpairs(data_tbl) %>% print
}
## --
## Pairs plot iter: 1
## Columns: default, contact, campaign, poutcome, y
## --
## Pairs plot iter: 2
## Columns: age, education, month, duration, poutcome
## --
## Pairs plot iter: 3
## Columns: marital, contact, pdays, previous, date
We want to look at how the variables split on the logical variables as this is a very natural way to observe the data.
### _TEMPLATE_
facet_varname <- 'y'
facet_count_max <- 3
facet_formula <- formula(paste0("~ as.factor(", facet_varname, ")"))
For logical variables we facet on barplots of the levels, comparing TRUE, FALSE and missing data.
logical_vars <- logical_vars[!logical_vars %in% facet_varname]
for(plot_varname in logical_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
filter_formula <- formula(paste0("~ !is.na(", plot_varname, ")"))
plot_tbl <- data_tbl %>% filter_(filter_formula)
facet_count <- plot_tbl %>%
.[[facet_varname]] %>%
unique %>%
length %>%
min(facet_count_max)
explore_plot <- ggplot(plot_tbl) +
geom_bar(aes_string(x = plot_varname)) +
facet_wrap(facet_formula, scales = 'free') +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0(facet_varname, '-Faceted Barplots for Variable: ', plot_varname)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
plot(explore_plot)
}
For numeric variables, we facet on histograms of the data.
for(plot_varname in numeric_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
filter_formula <- formula(paste0("~ !is.na(", plot_varname, ")"))
plot_tbl <- data_tbl %>% filter_(filter_formula)
facet_count <- plot_tbl %>%
.[[facet_varname]] %>%
unique %>%
length %>%
min(facet_count_max)
explore_plot <- ggplot(plot_tbl) +
geom_histogram(aes_string(x = plot_varname), bins = dataexp_hist_bins_count) +
facet_wrap(facet_formula, scales = 'free') +
xlab(plot_varname) +
ylab("Count") +
scale_x_continuous(labels = comma) +
scale_y_continuous(labels = comma) +
ggtitle(paste0(facet_varname, '-Faceted Histogram for Variable: ', plot_varname)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
print(explore_plot)
}
## --
## age
## --
## balance
## --
## day
## --
## duration
## --
## campaign
## --
## pdays
## --
## previous
We treat categorical variables like logical variables, faceting the barplots of the different levels of the data.
categorical_vars <- categorical_vars[!categorical_vars %in% facet_varname]
for(plot_varname in categorical_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
filter_formula <- formula(paste0("~ !is.na(", plot_varname, ")"))
plot_tbl <- data_tbl %>% filter_(filter_formula)
facet_count <- plot_tbl %>%
.[[facet_varname]] %>%
unique %>%
length %>%
min(facet_count_max)
explore_plot <- ggplot(plot_tbl) +
geom_bar(aes_string(x = plot_varname)) +
facet_wrap(facet_formula, scales = 'free') +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0(facet_varname, '-Faceted Histogram for Variable: ', plot_varname)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
plot(explore_plot)
}
## --
## job
## --
## marital
## --
## education
## --
## default
## --
## housing
## --
## loan
## --
## contact
## --
## month
## --
## poutcome
Like the univariate plots, we facet on histograms of the years in the dates.
for(plot_varname in datetime_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
filter_formula <- formula(paste0("~ !is.na(", plot_varname, ")"))
plot_tbl <- data_tbl %>%
filter_(filter_formula) %>%
mutate_(plot_vartmp = plot_varname) %>%
mutate(plot_var = year(plot_vartmp))
facet_count <- plot_tbl %>%
.[[facet_varname]] %>%
unique %>%
length %>%
min(facet_count_max)
explore_plot <- ggplot(plot_tbl) +
geom_bar(aes(x = plot_var)) +
facet_wrap(facet_formula, scales = 'free') +
xlab(plot_varname) +
ylab("Count") +
scale_y_continuous(labels = comma) +
ggtitle(paste0(facet_varname, '-Faceted Histogram for Variable: ', plot_varname))
plot(explore_plot)
}
## --
## date
Having looked at the pairs plots we also look at multivariate plots of all the data. We do this using techniques known as ‘multidimensional scaling’ or MDS.
Many of these techniques do not scale well beyond a few thousand data points, so we repeat our sampling trick as before and create multiple plots from these samples.
numeric_vars <- create_coltype_list(explore_data_tbl)$split$continuous
We start with classic multidimensional scaling, also called ‘principal coordinates analysis’, which is done in R via the function cmdscale.
mds_iter_count <- 4
mds_sample_count <- 2000
row_ids <- data_tbl %>%
dplyr::select(one_of(numeric_vars)) %>%
complete.cases
### _TEMPLATE_
### Choosing the first variable in the categorical list by default. You probably
### want to change that.
colour_var <- categorical_vars[1]
input_tbl <- data_tbl %>%
dplyr::select(one_of(c(numeric_vars, colour_var))) %>%
filter(row_ids)
construct_mds_plot <- function(mds_tbl) {
num_mds_dist <- mds_tbl %>% dplyr::select(one_of(numeric_vars)) %>% dist
num_mds <- cmdscale(num_mds_dist, k = 2, eig = TRUE)
mds_tbl <- mds_tbl %>%
mutate(mds_d1 = num_mds$points[,1]
,mds_d2 = num_mds$points[,2])
mds_plot <- ggplot(mds_tbl) +
geom_point(aes_string(x = 'mds_d1', y = 'mds_d2', colour = colour_var)) +
xlab("MDS Dim 1") +
ylab("MDS Dim 2")
return(mds_plot)
}
mds_lst <- create_sampled_output(input_tbl, construct_mds_plot, mds_sample_count, mds_iter_count)
for(i in 1:length(mds_lst)) {
cat("--\n")
cat(paste0("MDS plot iter: ", i, "\n"))
mds_lst[[i]] %>% print
}
## --
## MDS plot iter: 1
## --
## MDS plot iter: 2
## --
## MDS plot iter: 3
## --
## MDS plot iter: 4
One standard method for doing this is t-SNE, t-distributed Stochastic Neighbourhood Embedding. This algorithm is a type of dimensionality reduction - it constructs a lower-dimensional set of data from the original dataset by attempting the minimise the Kullback-Lieber divergence between the original and target datasets.
t-SNE requires unique datapoints, so to ensure we do not pass repeated rows at any point, we may add a small amount of noise to the numeric columns to ensure uniqueness - t-SNE is a probabilistic process so this should not affect our output very much.
As with previous methods, we take samples from larger datasets and plot outputs from multiple samples.
tsne_iter_count <- 4
tsne_sample_count <- 5000
row_ids <- data_tbl %>%
dplyr::select(one_of(numeric_vars)) %>%
complete.cases
### _TEMPLATE_
### Choosing the first variable in the categorical list by default. You probably
### want to change that.
colour_var <- categorical_vars[1]
input_tbl <- data_tbl %>%
dplyr::select(one_of(c(numeric_vars, colour_var))) %>%
jitter_numeric_variables %>%
filter(row_ids)
construct_tsne_plot <- function(tsne_tbl) {
data_tsne <- Rtsne(tsne_tbl %>% dplyr::select(one_of(numeric_vars)))
tsne_tbl$tsne_d1 <- data_tsne$Y[,1]
tsne_tbl$tsne_d2 <- data_tsne$Y[,2]
tsne_plot <- ggplot(tsne_tbl) +
geom_point(aes_string(x = 'tsne_d1', y = 'tsne_d2', colour = colour_var)
,size = 0.5) +
xlab("t-SNE Dim 1") +
ylab("t-SNE Dim 2")
return(tsne_plot)
}
tsne_lst <- create_sampled_output(input_tbl, construct_tsne_plot, tsne_sample_count, tsne_iter_count)
for(i in 1:length(tsne_lst)) {
cat("--\n")
cat(paste0("t-SNE plot iter: ", i, "\n"))
tsne_lst[[i]] %>% print
}
## --
## t-SNE plot iter: 1
## --
## t-SNE plot iter: 2
## --
## t-SNE plot iter: 3
## --
## t-SNE plot iter: 4
Another important part of data exploration is the identification of possible outliers, and we approach this in multiple ways.
In keeping with the methodical approach we start with a univariate perspective, looking at each numerical variable by itself and plotting the values in the variable both with and without the outliers.
for(plot_varname in numeric_vars) {
cat("--\n")
cat(paste0(plot_varname, '\n'))
plot_var <- data_tbl %>% .[[plot_varname]]
outlier_point <- identify_univariate_outliers(plot_var)
no_outlier_vals <- plot_var[outlier_point]
all_plot <- ggplot() +
geom_histogram(aes(x = plot_var), bins = dataexp_hist_bins_count) +
xlab(plot_varname) +
ylab("Count") +
scale_x_continuous(labels = comma) +
scale_y_continuous(labels = comma) +
ggtitle("All Data") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
no_outlier_plot <- ggplot() +
geom_histogram(aes(x = no_outlier_vals), bins = dataexp_hist_bins_count) +
xlab(plot_varname) +
ylab("Count") +
scale_x_continuous(labels = comma) +
scale_y_continuous(labels = comma) +
ggtitle("No Outliers") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
plot_grid(all_plot, no_outlier_plot, ncol = 2) %>% print
}
## --
## age
## --
## balance
## --
## day
## --
## duration
## --
## campaign
## --
## pdays
## --
## previous
We use the above plots to decide if we need to remove certain extreme values from the dataset.
# We place basic logic for identifying univariate outliers here
#data_filt_tbl <- data_tbl %>% mutate(uni_outlier = ifelse(val > 100))